#include <xen/init.h>
#include <xen/lib.h>
#include <xen/errno.h>
+#include <xen/domain.h>
#include <xen/sched.h>
#include <xen/smp.h>
#include <xen/delay.h>
#include <xen/kernel.h>
#include <xen/lib.h>
#include <xen/mm.h>
+#include <xen/domain.h>
#include <xen/sched.h>
#include <xen/errno.h>
#include <xen/perfc.h>
#include <xen/init.h>
#include <xen/kernel.h>
#include <xen/mm.h>
+#include <xen/domain.h>
#include <xen/sched.h>
#include <xen/irq.h>
#include <xen/delay.h>
struct domain *dom0;
+struct domain *alloc_domain(domid_t domid)
+{
+ struct domain *d;
+
+ if ( (d = xmalloc(struct domain)) == NULL )
+ return NULL;
+
+ memset(d, 0, sizeof(*d));
+ d->domain_id = domid;
+ atomic_set(&d->refcnt, 1);
+ spin_lock_init(&d->big_lock);
+ spin_lock_init(&d->page_alloc_lock);
+ INIT_LIST_HEAD(&d->page_list);
+ INIT_LIST_HEAD(&d->xenpage_list);
+
+ return d;
+}
+
+
+void free_domain(struct domain *d)
+{
+ struct vcpu *v;
+ int i;
+
+ sched_destroy_domain(d);
+
+ for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
+ if ( (v = d->vcpu[i]) != NULL )
+ free_vcpu_struct(v);
+
+ xfree(d);
+}
+
+
+struct vcpu *alloc_vcpu(
+ struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
+{
+ struct vcpu *v;
+
+ BUG_ON(d->vcpu[vcpu_id] != NULL);
+
+ if ( (v = alloc_vcpu_struct(d, vcpu_id)) == NULL )
+ return NULL;
+
+ v->domain = d;
+ v->vcpu_id = vcpu_id;
+ v->processor = cpu_id;
+ atomic_set(&v->pausecnt, 0);
+ v->vcpu_info = &d->shared_info->vcpu_info[vcpu_id];
+
+ v->cpu_affinity = is_idle_domain(d) ?
+ cpumask_of_cpu(cpu_id) : CPU_MASK_ALL;
+
+ v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
+ v->runstate.state_entry_time = NOW();
+
+ if ( (vcpu_id != 0) && !is_idle_domain(d) )
+ set_bit(_VCPUF_down, &v->vcpu_flags);
+
+ if ( sched_init_vcpu(v) < 0 )
+ {
+ free_vcpu_struct(v);
+ return NULL;
+ }
+
+ d->vcpu[vcpu_id] = v;
+ if ( vcpu_id != 0 )
+ d->vcpu[v->vcpu_id-1]->next_in_list = v;
+
+ return v;
+}
+
+
struct domain *domain_create(domid_t domid, unsigned int cpu)
{
struct domain *d, **pd;
void domain_kill(struct domain *d)
{
- struct vcpu *v;
-
domain_pause(d);
- if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) )
- {
- for_each_vcpu(d, v)
- sched_rem_domain(v);
- gnttab_release_mappings(d);
- domain_relinquish_resources(d);
- put_domain(d);
- send_guest_global_virq(dom0, VIRQ_DOM_EXC);
- }
+ if ( test_and_set_bit(_DOMF_dying, &d->domain_flags) )
+ return;
+
+ gnttab_release_mappings(d);
+ domain_relinquish_resources(d);
+ put_domain(d);
+
+ send_guest_global_virq(dom0, VIRQ_DOM_EXC);
}
}
/**
- * bvt_alloc_task - allocate BVT private structures for a task
- * @p: task to allocate private structures for
- *
+ * bvt_init_vcpu - allocate BVT private structures for a VCPU.
* Returns non-zero on failure.
*/
-static int bvt_alloc_task(struct vcpu *v)
+static int bvt_init_vcpu(struct vcpu *v)
{
struct domain *d = v->domain;
struct bvt_dom_info *inf;
+ struct bvt_vcpu_info *einf;
if ( (d->sched_priv == NULL) )
{
init_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, v->processor);
}
- return 0;
-}
-
-/*
- * Add and remove a domain
- */
-static void bvt_add_task(struct vcpu *v)
-{
- struct bvt_vcpu_info *einf = EBVT_INFO(v);
+ einf = EBVT_INFO(v);
/* Allocate per-CPU context if this is the first domain to be added. */
if ( CPU_INFO(v->processor) == NULL )
einf->avt = einf->evt = ~0U;
BUG_ON(__task_on_runqueue(v));
__add_to_runqueue_head(v);
- }
+ }
else
{
/* Set avt and evt to system virtual time. */
einf->avt = CPU_SVT(v->processor);
einf->evt = CPU_SVT(v->processor);
}
+
+ return 0;
}
static void bvt_wake(struct vcpu *v)
/**
- * bvt_free_task - free BVT private structures for a task
- * @d: task
+ * bvt_destroy_domain - free BVT private structures for a domain.
*/
-static void bvt_free_task(struct domain *d)
+static void bvt_destroy_domain(struct domain *d)
{
struct bvt_dom_info *inf = BVT_INFO(d);
.name = "Borrowed Virtual Time",
.opt_name = "bvt",
.sched_id = SCHED_BVT,
-
- .alloc_task = bvt_alloc_task,
- .add_task = bvt_add_task,
- .free_task = bvt_free_task,
+
+ .init_vcpu = bvt_init_vcpu,
+ .destroy_domain = bvt_destroy_domain,
+
.do_schedule = bvt_do_schedule,
.control = bvt_ctl,
.adjdom = bvt_adjdom,
} while ( 0 );
#define CSCHED_STATS_EXPAND_SCHED(_MACRO) \
- _MACRO(vcpu_alloc) \
- _MACRO(vcpu_add) \
+ _MACRO(vcpu_init) \
_MACRO(vcpu_sleep) \
_MACRO(vcpu_wake_running) \
_MACRO(vcpu_wake_onrunq) \
_MACRO(vcpu_wake_runnable) \
_MACRO(vcpu_wake_not_runnable) \
- _MACRO(dom_free) \
+ _MACRO(dom_destroy) \
_MACRO(schedule) \
_MACRO(tickle_local_idler) \
_MACRO(tickle_local_over) \
}
static int
-csched_vcpu_alloc(struct vcpu *vc)
+csched_vcpu_init(struct vcpu *vc)
{
struct domain * const dom = vc->domain;
struct csched_dom *sdom;
struct csched_vcpu *svc;
int16_t pri;
- CSCHED_STAT_CRANK(vcpu_alloc);
+ CSCHED_STAT_CRANK(vcpu_init);
/* Allocate, if appropriate, per-domain info */
if ( is_idle_vcpu(vc) )
if ( likely(sdom != NULL) )
csched_vcpu_acct(svc, 0);
- return 0;
-}
-
-static void
-csched_vcpu_add(struct vcpu *vc)
-{
- CSCHED_STAT_CRANK(vcpu_add);
-
/* Allocate per-PCPU info */
if ( unlikely(!CSCHED_PCPU(vc->processor)) )
csched_pcpu_init(vc->processor);
CSCHED_VCPU_CHECK(vc);
+
+ return 0;
}
static void
}
static void
-csched_dom_free(struct domain *dom)
+csched_dom_destroy(struct domain *dom)
{
struct csched_dom * const sdom = CSCHED_DOM(dom);
int i;
- CSCHED_STAT_CRANK(dom_free);
+ CSCHED_STAT_CRANK(dom_destroy);
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
{
.opt_name = "credit",
.sched_id = SCHED_CREDIT,
- .alloc_task = csched_vcpu_alloc,
- .add_task = csched_vcpu_add,
+ .init_vcpu = csched_vcpu_init,
+ .destroy_domain = csched_dom_destroy,
+
.sleep = csched_vcpu_sleep,
.wake = csched_vcpu_wake,
+
.set_affinity = csched_vcpu_set_affinity,
.adjdom = csched_dom_cntl,
- .free_task = csched_dom_free,
.tick = csched_tick,
.do_schedule = csched_schedule,
}
-/* Allocates memory for per domain private scheduling data*/
-static int sedf_alloc_task(struct vcpu *v)
+static int sedf_init_vcpu(struct vcpu *v)
{
- PRINT(2, "sedf_alloc_task was called, domain-id %i.%i\n",
- v->domain->domain_id, v->vcpu_id);
+ struct sedf_vcpu_info *inf;
if ( v->domain->sched_priv == NULL )
{
if ( (v->sched_priv = xmalloc(struct sedf_vcpu_info)) == NULL )
return -1;
-
memset(v->sched_priv, 0, sizeof(struct sedf_vcpu_info));
- return 0;
-}
-
-
-/* Setup the sedf_dom_info */
-static void sedf_add_task(struct vcpu *v)
-{
- struct sedf_vcpu_info *inf = EDOM_INFO(v);
-
+ inf = EDOM_INFO(v);
inf->vcpu = v;
- PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",
- v->domain->domain_id, v->vcpu_id);
-
/* Allocate per-CPU context if this is the first domain to be added. */
if ( unlikely(schedule_data[v->processor].sched_priv == NULL) )
{
EDOM_INFO(v)->deadl_abs = 0;
EDOM_INFO(v)->status &= ~SEDF_ASLEEP;
}
+
+ return 0;
}
-/* Frees memory used by domain info */
-static void sedf_free_task(struct domain *d)
+static void sedf_destroy_domain(struct domain *d)
{
int i;
- PRINT(2,"sedf_free_task was called, domain-id %i\n",d->domain_id);
-
xfree(d->sched_priv);
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
.opt_name = "sedf",
.sched_id = SCHED_SEDF,
- .alloc_task = sedf_alloc_task,
- .add_task = sedf_add_task,
- .free_task = sedf_free_task,
+ .init_vcpu = sedf_init_vcpu,
+ .destroy_domain = sedf_destroy_domain,
+
.do_schedule = sedf_do_schedule,
.dump_cpu_state = sedf_dump_cpu_state,
.sleep = sedf_sleep,
}
}
-struct domain *alloc_domain(domid_t domid)
-{
- struct domain *d;
-
- if ( (d = xmalloc(struct domain)) == NULL )
- return NULL;
-
- memset(d, 0, sizeof(*d));
- d->domain_id = domid;
- atomic_set(&d->refcnt, 1);
- spin_lock_init(&d->big_lock);
- spin_lock_init(&d->page_alloc_lock);
- INIT_LIST_HEAD(&d->page_list);
- INIT_LIST_HEAD(&d->xenpage_list);
-
- return d;
-}
-
-void free_domain(struct domain *d)
-{
- struct vcpu *v;
- int i;
-
- for_each_vcpu ( d, v )
- sched_rem_domain(v);
-
- SCHED_OP(free_task, d);
-
- for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
- if ( (v = d->vcpu[i]) != NULL )
- free_vcpu_struct(v);
-
- xfree(d);
-}
-
-struct vcpu *alloc_vcpu(
- struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
-{
- struct vcpu *v;
-
- BUG_ON(d->vcpu[vcpu_id] != NULL);
-
- if ( (v = alloc_vcpu_struct(d, vcpu_id)) == NULL )
- return NULL;
-
- v->domain = d;
- v->vcpu_id = vcpu_id;
- v->processor = cpu_id;
- atomic_set(&v->pausecnt, 0);
- v->vcpu_info = &d->shared_info->vcpu_info[vcpu_id];
-
- v->cpu_affinity = is_idle_domain(d) ?
- cpumask_of_cpu(cpu_id) : CPU_MASK_ALL;
-
- v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
- v->runstate.state_entry_time = NOW();
-
- if ( (vcpu_id != 0) && !is_idle_domain(d) )
- set_bit(_VCPUF_down, &v->vcpu_flags);
-
- if ( SCHED_OP(alloc_task, v) < 0 )
- {
- free_vcpu_struct(v);
- return NULL;
- }
-
- d->vcpu[vcpu_id] = v;
- if ( vcpu_id != 0 )
- d->vcpu[v->vcpu_id-1]->next_in_list = v;
-
- sched_add_domain(v);
-
- return v;
-}
-
-void sched_add_domain(struct vcpu *v)
+int sched_init_vcpu(struct vcpu *v)
{
/* Initialise the per-domain timers. */
init_timer(&v->timer, vcpu_timer_fn, v, v->processor);
set_bit(_VCPUF_running, &v->vcpu_flags);
}
- SCHED_OP(add_task, v);
TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
+
+ return SCHED_OP(init_vcpu, v);
}
-void sched_rem_domain(struct vcpu *v)
+void sched_destroy_domain(struct domain *d)
{
- kill_timer(&v->timer);
- kill_timer(&v->poll_timer);
+ struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ {
+ kill_timer(&v->timer);
+ kill_timer(&v->poll_timer);
+ TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
+ }
- SCHED_OP(rem_task, v);
- TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
+ SCHED_OP(destroy_domain, d);
}
void vcpu_sleep_nosync(struct vcpu *v)
/* Initialise the data structures. */
void __init scheduler_init(void)
{
- int i, rc;
+ int i;
open_softirq(SCHEDULE_SOFTIRQ, __enter_scheduler);
printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
SCHED_OP(init);
-
- if ( idle_vcpu[0] != NULL )
- {
- schedule_data[0].curr = idle_vcpu[0];
- schedule_data[0].idle = idle_vcpu[0];
-
- rc = SCHED_OP(alloc_task, idle_vcpu[0]);
- BUG_ON(rc < 0);
-
- sched_add_domain(idle_vcpu[0]);
- }
}
/*
#ifndef __XEN_DOMAIN_H__
#define __XEN_DOMAIN_H__
-extern int boot_vcpu(
+struct vcpu *alloc_vcpu(
+ struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
+int boot_vcpu(
struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt);
+struct domain *alloc_domain(domid_t domid);
+void free_domain(struct domain *d);
+
/*
* Arch-specifics.
*/
struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id);
-extern void free_vcpu_struct(struct vcpu *v);
+void free_vcpu_struct(struct vcpu *v);
-extern int arch_domain_create(struct domain *d);
+int arch_domain_create(struct domain *d);
-extern void arch_domain_destroy(struct domain *d);
+void arch_domain_destroy(struct domain *d);
-extern int arch_set_info_guest(
- struct vcpu *v, struct vcpu_guest_context *c);
+int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c);
-extern void domain_relinquish_resources(struct domain *d);
+void domain_relinquish_resources(struct domain *d);
-extern void dump_pageframe_info(struct domain *d);
+void dump_pageframe_info(struct domain *d);
-extern void arch_dump_domain_info(struct domain *d);
+void arch_dump_domain_info(struct domain *d);
#endif /* __XEN_DOMAIN_H__ */
void (*init) (void);
void (*tick) (unsigned int cpu);
- int (*alloc_task) (struct vcpu *);
- void (*add_task) (struct vcpu *);
- void (*free_task) (struct domain *);
- void (*rem_task) (struct vcpu *);
+
+ int (*init_vcpu) (struct vcpu *);
+ void (*destroy_domain) (struct domain *);
+
void (*sleep) (struct vcpu *);
void (*wake) (struct vcpu *);
+
int (*set_affinity) (struct vcpu *, cpumask_t *);
+
struct task_slice (*do_schedule) (s_time_t);
+
int (*control) (struct sched_ctl_cmd *);
int (*adjdom) (struct domain *,
struct sched_adjdom_cmd *);
#define is_idle_domain(d) ((d)->domain_id == IDLE_DOMAIN_ID)
#define is_idle_vcpu(v) (is_idle_domain((v)->domain))
-struct vcpu *alloc_vcpu(
- struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
-
-struct domain *alloc_domain(domid_t domid);
-void free_domain(struct domain *d);
-
#define DOMAIN_DESTROYED (1<<31) /* assumes atomic_t is >= 32 bits */
#define put_domain(_d) \
if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destroy(_d)
#define set_current_state(_s) do { current->state = (_s); } while (0)
void scheduler_init(void);
void schedulers_start(void);
-void sched_add_domain(struct vcpu *);
-void sched_rem_domain(struct vcpu *);
+int sched_init_vcpu(struct vcpu *);
+void sched_destroy_domain(struct domain *);
long sched_ctl(struct sched_ctl_cmd *);
long sched_adjdom(struct sched_adjdom_cmd *);
int sched_id(void);